Pkg.update()
using NeuralNetDiffEq
using Base.Test
using Plots; plotly()
using DiffEqBase, ParameterizedFunctions
using DiffEqProblemLibrary, DiffEqDevTools
using Knet
INFO: Recompiling stale cache file /home/akshay/.julia/lib/v0.5/NeuralNetDiffEq.ji for module NeuralNetDiffEq.
linear = (t,u) -> (1.01*u)
(f::typeof(linear))(::Type{Val{:analytic}},t,u0) = u0*exp(1.01*t)
prob = ODEProblem(linear,1/2,(0.0,1.0))
sol = solve(prob,nnode(10),dt=1/10,iterations=10)
#dts = 1./2.^(8:-1:2)
#sim_linear = test_convergence(dts,prob,nnode(10),iterations=5000)
WARNING: The finite difference methods from Calculus.jl no longer extend Base.gradient and should be called as Calculus.gradient instead. This usage is deprecated.
5.033621 seconds (3.83 M allocations: 165.436 MB, 0.97% gc time)
DiffEqBase.ODESolution{Any,1,Array{Array{Any,1},1},Array{Float64,1},Dict{Symbol,Float64},LinSpace{Float64},Array{Any,1},DiffEqBase.ODEProblem{Float64,Float64,false,##1#2,Void,UniformScaling{Int64}},NeuralNetDiffEq.nnode,NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}}(Array{Any,1}[Any[0.5],Any[0.556397],Any[0.621489],Any[0.695964],Any[0.780491],Any[0.875708],Any[0.982203],Any[1.1005],Any[1.23106],Any[1.37424]],[0.5,0.559381,0.625814,0.700136,0.783285,0.876309,0.980381,1.09681,1.22707,1.3728],Dict(:l∞=>0.00432485,:final=>0.00143758,:L∞=>0.00443078,:L2=>0.00311934,:l2=>0.00296859),linspace(0.0,1.0,10),Any[],DiffEqBase.ODEProblem{Float64,Float64,false,##1#2,Void,UniformScaling{Int64}}(#1,0.5,(0.0,1.0),nothing,UniformScaling{Int64} 1*I),NeuralNetDiffEq.nnode([10]),NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}(Function[NeuralNetDiffEq.u],Any[ [0.808409; 0.979205; … ; 0.809723; 0.809013], [-1.27095; -1.49675; … ; -1.27289; -1.27186], [0.239789 0.213398 … 0.239695 0.239714]]),true,0,:Success)
plot(sol,plot_analytic=true)
sol(0.232)
1-element Array{Any,1}: 0.627656
f = (t,u) -> (t^3 + 2*t + (t^2)*((1+3*(t^2))/(1+t+(t^3))) - u*(t + ((1+3*(t^2))/(1+t+t^3))))
(::typeof(f))(::Type{Val{:analytic}},t,u0) = u0*exp(-(t^2)/2)/(1+t+t^3) + t^2
prob2 = ODEProblem(f,1.0,(0.0,1.0))
sol2 = solve(prob2,nnode(10),dt=0.1,iterations=200)
(:iteration,100,:loss,0.004084245564297921) (:iteration,200,:loss,0.003965820781884261) 5.604121 seconds (21.01 M allocations: 1000.742 MB, 3.64% gc time)
DiffEqBase.ODESolution{Any,1,Array{Array{Any,1},1},Array{Float64,1},Dict{Symbol,Float64},LinSpace{Float64},Array{Any,1},DiffEqBase.ODEProblem{Float64,Float64,false,##3#4,Void,UniformScaling{Int64}},NeuralNetDiffEq.nnode,NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}}(Array{Any,1}[Any[1.0],Any[0.904191],Any[0.836905],Any[0.797362],Any[0.785546],Any[0.80229],Any[0.849234],Any[0.928605],Any[1.04275],Any[1.19349]],[1.0,0.905704,0.840507,0.801406,0.788794,0.80487,0.852367,0.933629,1.05009,1.20218],Dict(:l∞=>0.00868422,:final=>0.00868422,:L∞=>0.00868422,:L2=>0.00442772,:l2=>0.00461719),linspace(0.0,1.0,10),Any[],DiffEqBase.ODEProblem{Float64,Float64,false,##3#4,Void,UniformScaling{Int64}}(#3,1.0,(0.0,1.0),nothing,UniformScaling{Int64} 1*I),NeuralNetDiffEq.nnode([10]),NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}(Function[NeuralNetDiffEq.u],Any[ [2.58477; 2.60085; … ; -1.30027; -1.29989], [-3.20789; -3.19851; … ; -1.07264; -1.099], [0.464153 0.469281 … -0.918577 -0.897459]]),true,0,:Success)
plot(sol2,plot_analytic=true)
f2 = (t,u) -> (-u/5 + exp(-t/5).*cos(t))
(::typeof(f2))(::Type{Val{:analytic}},t,u0) = exp(-t/5)*(u0 + sin(t))
prob3 = ODEProblem(f2,Float32(0.0),(Float32(0.0),Float32(2.0)))
sol3 = solve(prob3,nnode(10),dt=0.2,iterations=1000)
(:iteration,100,:loss,0.001351321f0) (:iteration,200,:loss,0.0011618182f0) (:iteration,300,:loss,0.000106007246f0) (:iteration,400,:loss,0.0008162024f0) (:iteration,500,:loss,0.0005109266f0) (:iteration,600,:loss,6.947068f-6) (:iteration,700,:loss,0.00014337225f0) (:iteration,800,:loss,0.00024213137f0) (:iteration,900,:loss,0.00011455115f0) (:iteration,1000,:loss,0.000105658386f0) 31.870545 seconds (107.09 M allocations: 4.870 GB, 3.23% gc time)
DiffEqBase.ODESolution{Any,1,Array{Array{Any,1},1},Array{Float32,1},Dict{Symbol,Float32},LinSpace{Float32},Array{Any,1},DiffEqBase.ODEProblem{Float32,Float32,false,##5#6,Void,UniformScaling{Int64}},NeuralNetDiffEq.nnode,NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}}(Array{Any,1}[Any[0.0],Any[0.210167],Any[0.392674],Any[0.53995],Any[0.648107],Any[0.715306],Any[0.741437],Any[0.728599],Any[0.68104],Any[0.604167]],Float32[0.0,0.210817,0.393387,0.541181,0.649923,0.717615,0.744435,0.732547,0.685826,0.60952],Dict(:l∞=>0.00535357,:final=>0.00535357,:L∞=>0.00535357,:L2=>0.00284577,:l2=>0.00295334),linspace(0.0f0,2.0f0,10),Any[],DiffEqBase.ODEProblem{Float32,Float32,false,##5#6,Void,UniformScaling{Int64}}(#5,0.0f0,(0.0f0,2.0f0),nothing,UniformScaling{Int64} 1*I),NeuralNetDiffEq.nnode([10]),NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}(Function[NeuralNetDiffEq.u],Any[ Float32[-3.85678; -2.08065; … ; -1.35389; -3.56415], Float32[-3.58593; -0.295715; … ; 1.89219; -8.08466], Float32[-2.36302 0.302651 … 1.05729 -1.73183]]),true,0,:Success)
plot(sol3,plot_analytic=true)
sol3([0.721])
function SODE_2(t,u)
du1 = cos(t) + u[1]^2 + u[2] - (1+ t^2 + (sin(t))^2)
du2 = 2t - (1+t^2)*sin(t) + u[1]*u[2]
[du1 du2]
end
SODE_2 (generic function with 1 method)
prob4 = ODEProblem(SODE_2,Float32[0.0,1.0],(Float32(0.0),Float32(3.0)))
DiffEqBase.ODEProblem{Array{Float32,1},Float32,false,#SODE_2,Void,UniformScaling{Int64}}(SODE_2,Float32[0.0,1.0],(0.0f0,3.0f0),nothing,UniformScaling{Int64} 1*I)
sol4 = solve(prob4,nnode([32,64]),dt=0.3,iterations=10000)
(:iteration,100,:loss,17.378231f0) (:iteration,200,:loss,0.47261918f0) (:iteration,300,:loss,0.18454815f0) (:iteration,400,:loss,0.030971492f0) (:iteration,500,:loss,0.38109463f0) (:iteration,600,:loss,0.4615858f0) (:iteration,700,:loss,0.29755196f0) (:iteration,800,:loss,0.24676056f0) (:iteration,900,:loss,0.1285587f0) (:iteration,1000,:loss,0.104656056f0) (:iteration,1100,:loss,0.0738853f0) (:iteration,1200,:loss,0.04995005f0) (:iteration,1300,:loss,0.03526501f0) (:iteration,1400,:loss,0.042126857f0) (:iteration,1500,:loss,0.025359638f0) (:iteration,1600,:loss,0.016927922f0) (:iteration,1700,:loss,0.0053606164f0) (:iteration,1800,:loss,0.060100272f0) (:iteration,1900,:loss,0.052357633f0) (:iteration,2000,:loss,0.04675577f0) (:iteration,2100,:loss,0.0011256242f0) (:iteration,2200,:loss,0.02318639f0) (:iteration,2300,:loss,0.05054543f0) (:iteration,2400,:loss,0.007044795f0) (:iteration,2500,:loss,0.024661614f0) (:iteration,2600,:loss,0.04080418f0) (:iteration,2700,:loss,0.053645357f0) (:iteration,2800,:loss,0.05328556f0) (:iteration,2900,:loss,0.049578153f0) (:iteration,3000,:loss,0.051114455f0) (:iteration,3100,:loss,0.010174504f0) (:iteration,3200,:loss,0.03178819f0) (:iteration,3300,:loss,0.012082014f0) (:iteration,3400,:loss,0.043708f0) (:iteration,3500,:loss,0.010928344f0) (:iteration,3600,:loss,0.04992518f0) (:iteration,3700,:loss,0.047260806f0) (:iteration,3800,:loss,0.05280282f0) (:iteration,3900,:loss,0.033523507f0) (:iteration,4000,:loss,0.02261108f0) (:iteration,4100,:loss,0.015800763f0) (:iteration,4200,:loss,0.049049973f0) (:iteration,4300,:loss,0.018776141f0) (:iteration,4400,:loss,0.04851945f0) (:iteration,4500,:loss,0.017102791f0) (:iteration,4600,:loss,0.023749972f0) (:iteration,4700,:loss,0.016840965f0) (:iteration,4800,:loss,0.025703821f0) (:iteration,4900,:loss,0.008277815f0) (:iteration,5000,:loss,0.006664231f0) (:iteration,5100,:loss,0.05378253f0) (:iteration,5200,:loss,0.0017224314f0) (:iteration,5300,:loss,0.0057390844f0) (:iteration,5400,:loss,0.042182785f0) (:iteration,5500,:loss,0.045584403f0) (:iteration,5600,:loss,0.026695276f0) (:iteration,5700,:loss,0.053972594f0) (:iteration,5800,:loss,0.004734574f0) (:iteration,5900,:loss,0.047237825f0) (:iteration,6000,:loss,0.028686453f0) (:iteration,6100,:loss,0.006065891f0) (:iteration,6200,:loss,0.030160412f0) (:iteration,6300,:loss,0.0023183245f0) (:iteration,6400,:loss,0.003218229f0) (:iteration,6500,:loss,0.03134954f0) (:iteration,6600,:loss,0.0050108437f0) (:iteration,6700,:loss,0.008754095f0) (:iteration,6800,:loss,0.018124115f0) (:iteration,6900,:loss,0.05131719f0) (:iteration,7000,:loss,0.0048302608f0) (:iteration,7100,:loss,0.009146246f0) (:iteration,7200,:loss,0.0070275813f0) (:iteration,7300,:loss,0.007308378f0) (:iteration,7400,:loss,0.035546955f0) (:iteration,7500,:loss,0.0019529471f0) (:iteration,7600,:loss,0.033570457f0) (:iteration,7700,:loss,0.007381956f0) (:iteration,7800,:loss,0.0367698f0) (:iteration,7900,:loss,0.011833419f0) (:iteration,8000,:loss,0.006623143f0) (:iteration,8100,:loss,0.009890006f0) (:iteration,8200,:loss,0.011293914f0) (:iteration,8300,:loss,0.038492292f0) (:iteration,8400,:loss,0.014266875f0) (:iteration,8500,:loss,0.004241224f0) (:iteration,8600,:loss,0.019556765f0) (:iteration,8700,:loss,0.011798006f0) (:iteration,8800,:loss,0.04367275f0) (:iteration,8900,:loss,0.041603822f0) (:iteration,9000,:loss,0.03468025f0) (:iteration,9100,:loss,0.0493725f0) (:iteration,9200,:loss,0.007875723f0) (:iteration,9300,:loss,0.031809688f0) (:iteration,9400,:loss,0.019739421f0) (:iteration,9500,:loss,0.024047706f0) (:iteration,9600,:loss,0.027570084f0) (:iteration,9700,:loss,0.021775201f0) (:iteration,9800,:loss,0.0047423546f0) (:iteration,9900,:loss,0.0039643794f0) (:iteration,10000,:loss,0.027088964f0) 1591.210794 seconds (4.23 G allocations: 347.342 GB, 4.85% gc time)
DiffEqBase.ODESolution{Any,2,Array{Array{Any,1},1},Void,Void,LinSpace{Float32},Array{Any,1},DiffEqBase.ODEProblem{Array{Float32,1},Float32,false,#SODE_2,Void,UniformScaling{Int64}},NeuralNetDiffEq.nnode,NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}}(Array{Any,1}[Any[0.0,1.0],Any[1.14148,-0.165008],Any[1.39813,-0.0605747],Any[1.79644,-0.0365672],Any[2.06487,0.0970011],Any[2.37757,0.0788738],Any[2.74986,-0.0465902],Any[3.02348,-0.493596],Any[3.33901,-1.06463],Any[3.69554,-1.58016]],nothing,nothing,linspace(0.0f0,3.0f0,10),Any[],DiffEqBase.ODEProblem{Array{Float32,1},Float32,false,#SODE_2,Void,UniformScaling{Int64}}(SODE_2,Float32[0.0,1.0],(0.0f0,3.0f0),nothing,UniformScaling{Int64} 1*I),NeuralNetDiffEq.nnode([32,64]),NeuralNetDiffEq.NeuralNetworkInterpolation{Array{Function,1},Array{Any,1}}(Function[NeuralNetDiffEq.u,NeuralNetDiffEq.u],Any[ Float32[-4.21356; -10.0707; … ; -4.27393; -4.50126], Float32[-10.5982; -9.49084; … ; -10.7843; -10.9054], Float32[-2.56834 -35.6895 … 0.978135 -0.693274; -10.1001 -67.5135 … -3.44434 -3.64688; … ; -1.06202 1.92709 … 0.48208 -1.10875; -7.27969 -42.5179 … -3.75603 -2.27306], Float32[-2.37571; -1.15281; … ; -4.33154; -3.00871], Float32[0.404967 -9.74056 … 0.781577 -1.25019; -13.1748 11.6971 … -12.8059 11.394]]),true,0,:Success)
plot(sol4)
linspace(0.0,5.0,4.0)
function SODE_2(t,u)
du1 = -u[1]/5 + exp(-t/5)*cos(t)
du2 = -u[2]
[du1,du2]
end
prob5 = ODEProblem(SODE_2,Float32[0.0,0.0],(Float32(0.0),Float32(2.0)))
sol5 = solve(prob5,nnode(10),dt=0.2,iterations=1000)
plot(sol5)
function lotka_voltera_tf(t,u)
du1 = 1.5 .* u[1] - 1.0 .* u[1].*u[2]
du2 = -3 .* u[2] + u[1].*u[2]
[du1 du2]
end
prob4 = ODEProblem(lotka_voltera_tf,Float32[1.0,1.0],(Float32(0.0),Float32(10.0)))
sol4 = solve(prob4,nnode(10),dt=1,iterations=100)
f = prob4.f
trial_sols = Array{Function}(2)
for i = 1:2
phi(P,t) = u0[i] + (t .- t0).*predict(P,t)[1]
trial_sols[i] = phi
end
for trial_sol in trial_sols
F2 = f(0.5,trial_sol(P,0.5))
end
for trial_sol in trial_sols
println(trial_sol(P,0.5))
end
function get_trial_sols(trial_sols,P,t)
T1 = trial_sols[1](P,t)
T2 = trial_sols[2](P,t)
[T1 T2]
end
u0 = prob4.u0
t0 = prob4.tspan[1]
sumabs2([gradient(x->get_trial_sols(trial_sols,P,x),t) .- f(0.5,get_trial_sols(trial_sols,P,t)) for t in timepoints])
f(0.5,get_trial_sols(trial_sols,P,0.5))
gradient(x->get_trial_sols(trial_sols,P,x),0.5)
X = [gradient(x->get_trial_sols(trial_sols,P,x),t) .- f(t,get_trial_sols(trial_sols,P,t)) for t in timepoints]
sum(sumabs2(X))
phi(P,t) = u0 + (t .- t0).*predict(P,t)[1]
function init_params(ftype,hl_width)
#P = Vector{Vector{Float32}}(4)
P = Array{Any}(3)
P[1] = randn(ftype,hl_width,1)
P[2] = zeros(ftype,hl_width,1)
P[3] = randn(ftype,1,hl_width)
#P[4] = zeros(Float32,1,1)
#P = map(x -> convert(atype, x), P)
return P
end
P = init_params(Float32,10)
function predict(P,x)
w1, b1, w2 = P
h = sigm(w1 * x .+ b1)
return w2 * h
end
timepoints = linspace(0,10,10)
A = Array{Float32}(2)
A[1] = Float32(0.1045)
A[2] = Float32(0.5674)
A + 4
phi(P,2)
p(P,t) = (t .- t0).*predict(P,t)[1]
f = prob.f
w,b,v = P
Q = x->phi(P,x)
gradient(Q,0.5) .- f(0.5,phi(P,0.5))
f(0.5,phi(P,0.5))
size(phi(P,0.5))
lotka_voltera_tf(0.5,)
using ForwardDiff
sig(x) = 1/(1+exp(-x))
sigder(x) = sigm(x)*(1-sigm(x))
function predict_nn(x,P)
w1, b1, w2 = P
h = sigm(w1 * x .+ b1)
return w2 * h
end
dN_dt(t) = sum([v[i]*w[i]*sigder(w[i]*t .+ b[i]) for i = 1:hl_width])
gradient(t->predict_nn(w,t),0.65)
function init_params(ftype,hl_width)
#P = Vector{Vector{Float32}}(4)
P = Array{Any}(3)
P[1] = randn(ftype,hl_width,1)
P[2] = zeros(ftype,hl_width,1)
P[3] = randn(ftype,1,hl_width)
#P[4] = zeros(Float32,1,1)
#P = map(x -> convert(atype, x), P)
return P
end
P = init_params(Float32,10)
w,b,v = P
hl_width = 10
gradient(sig,0.65)
sigder(0.65)
gradient(x->predict(P,x),0.65)
function pred(t,v,w,b)
return v*sig.(w*t + b)
end
gradient(x->pred(x,v,w,b),0.65)
dN_dt(0.65)
pred(0.65,v,w,b)
P
w1, b1, w2 = P
h = sig.(w1 * 0.65 + b1)
w2 * h
using Knet
predict_nn(P,0.65)
phi(P,t) = u0 + (t-t0)*predict_nn(P,t)
dPhi_dt(t) = predict_nn(P,t)+(t-t0)*dN_dt(t)
t0 = 0
u0 = 1/2
dPhi_dt(0.003)
gradient(x->phi(P,x),0.003)
gradient(x->predict_nn(P,x),0.65)
for i in (0.05,0.06,0.5,0.96,1.04,1.66,1.977,2.5,3.5)
println(:GradDifference ,gradient(x->phi(P,x),i) - dPhi_dt(i))
end
dN_dt(0.65)
gradient(x->predict_nn(x,P),0.65)
for i 1:10
println(i)
end
phi2(P,t) = u0[1] + (t .- t0).*predict(P,t)[1]
A = Array{Function}(2)
A[1] = phi2
A[1](P,0.5)